In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
In [4]:
MNIST = input_data.read_data_sets("mnist", one_hot = True)
In [5]:
MNIST
Out[5]:
In [6]:
learning_rate = 0.01
batch_size = 128
n_epochs = 25
Dimensions: X pri X*W (rows: number of pictures in batch, colums:number of pixels) Y has number of rows same as batch size and columns like number of classes
In [24]:
X = tf.placeholder(tf.float32, [batch_size, 784])
Y = tf.placeholder(tf.float32, [batch_size, 10])
In [25]:
# Create variables for weights and biasses
# Size of W is for X*W rows: number of classes, columns:number of pixels
# Bias is each for every class
W = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name="weights")
b = tf.Variable(tf.zeros([1, 10]), name="bias")
In [26]:
logits = tf.matmul(X, W) + b
In [27]:
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits)
loss = tf.reduce_mean(entropy) # Mean for all samples in minibatch
In [28]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
In [29]:
init = tf.global_variables_initializer()
In [41]:
with tf.Session() as sess:
sess.run(init)
n_batches = int(MNIST.train.num_examples/batch_size)
total_correct = 0
for i in range(n_epochs):
for _ in range(n_batches):
X_batch, Y_batch = MNIST.train.next_batch(batch_size)
_, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y: Y_batch})
preds = tf.nn.softmax(logits_batch)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
total_correct += sess.run(accuracy)
# loss_value = loss.eval(feed_dict={Y: Y_batch})
print('Epoch number ', i+1, ' has Loss:', loss_batch, ' Acuracy: ', total_correct)
In [ ]: